From d5921175088a5ae85b7e2d37619b56d6a0de8ce4 Mon Sep 17 00:00:00 2001 From: "awilliam@xenbuild.aw" Date: Tue, 24 Oct 2006 09:49:31 -0600 Subject: [PATCH] [IA64] Support multiple page sizes in VHPT Enable VHPT support for multiple page sizes. Signed-off-by: Anthony Xu --- xen/arch/ia64/vmx/vmmu.c | 8 ++++--- xen/arch/ia64/vmx/vmx_ivt.S | 16 ++++++++++---- xen/arch/ia64/vmx/vmx_phy_mode.c | 4 ++-- xen/arch/ia64/vmx/vmx_process.c | 4 ++-- xen/arch/ia64/vmx/vtlb.c | 34 ++++++++++++++++++++++------- xen/include/asm-ia64/mm.h | 4 ++++ xen/include/asm-ia64/vmmu.h | 3 ++- xen/include/asm-ia64/vmx_phy_mode.h | 2 +- 8 files changed, 54 insertions(+), 21 deletions(-) diff --git a/xen/arch/ia64/vmx/vmmu.c b/xen/arch/ia64/vmx/vmmu.c index 17599de150..aaf4055674 100644 --- a/xen/arch/ia64/vmx/vmmu.c +++ b/xen/arch/ia64/vmx/vmmu.c @@ -341,9 +341,9 @@ fetch_code(VCPU *vcpu, u64 gip, IA64_BUNDLE *pbundle) ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2); return IA64_RETRY; } - mfn = tlb->ppn >> (PAGE_SHIFT - ARCH_PAGE_SHIFT); maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) | (gip & (PSIZE(tlb->ps) - 1)); + mfn = maddr >> PAGE_SHIFT; } page = mfn_to_page(mfn); @@ -637,7 +637,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 vadr, u64 *padr) thash_data_t *data; ISR visr,pt_isr; REGS *regs; - u64 vhpt_adr; + u64 vhpt_adr, madr; IA64_PSR vpsr; regs=vcpu_regs(vcpu); pt_isr.val=VMX(vcpu,cr_isr); @@ -673,7 +673,9 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 vadr, u64 *padr) dnat_page_consumption(vcpu, vadr); return IA64_FAULT; }else{ - *padr = (get_gpfn_from_mfn(arch_to_xen_ppn(data->ppn)) << PAGE_SHIFT) | (vadr & (PAGE_SIZE - 1)); + madr = (data->ppn >> (data->ps - 12) << data->ps) | + (vadr & (PSIZE(data->ps) - 1)); + *padr = __mpa_to_gpa(madr); return IA64_NO_FAULT; } } diff --git a/xen/arch/ia64/vmx/vmx_ivt.S b/xen/arch/ia64/vmx/vmx_ivt.S index 625c1b01e5..484d3ebe66 100644 --- a/xen/arch/ia64/vmx/vmx_ivt.S +++ b/xen/arch/ia64/vmx/vmx_ivt.S @@ -172,13 +172,17 @@ vmx_itlb_loop: ld8 r27 = [r18] ld8 r29 = [r28] ;; - st8 [r16] = r29 - st8 [r28] = r22 + st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET + st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET extr.u r19 = r27, 56, 4 ;; + ld8 r29 = [r16] + ld8 r22 = [r28] dep r27 = r0, r27, 56, 4 dep r25 = r19, r25, 56, 4 ;; + st8 [r16] = r22 + st8 [r28] = r29 st8 [r18] = r25 st8 [r17] = r27 ;; @@ -246,13 +250,17 @@ vmx_dtlb_loop: ld8 r27 = [r18] ld8 r29 = [r28] ;; - st8 [r16] = r29 - st8 [r28] = r22 + st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET + st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET extr.u r19 = r27, 56, 4 ;; + ld8 r29 = [r16] + ld8 r22 = [r28] dep r27 = r0, r27, 56, 4 dep r25 = r19, r25, 56, 4 ;; + st8 [r16] = r22 + st8 [r28] = r29 st8 [r18] = r25 st8 [r17] = r27 ;; diff --git a/xen/arch/ia64/vmx/vmx_phy_mode.c b/xen/arch/ia64/vmx/vmx_phy_mode.c index d59e74e0e0..d3337516be 100644 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c @@ -107,7 +107,7 @@ physical_mode_init(VCPU *vcpu) extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *); void -physical_tlb_miss(VCPU *vcpu, u64 vadr) +physical_tlb_miss(VCPU *vcpu, u64 vadr, int type) { u64 pte; ia64_rr rr; @@ -117,7 +117,7 @@ physical_tlb_miss(VCPU *vcpu, u64 vadr) pte = pte | PHY_PAGE_UC; else pte = pte | PHY_PAGE_WB; - thash_vhpt_insert(vcpu, pte, (rr.ps << 2), vadr); + thash_vhpt_insert(vcpu, pte, (rr.ps << 2), vadr, type); return; } diff --git a/xen/arch/ia64/vmx/vmx_process.c b/xen/arch/ia64/vmx/vmx_process.c index a0caebfa88..7320beb9bb 100644 --- a/xen/arch/ia64/vmx/vmx_process.c +++ b/xen/arch/ia64/vmx/vmx_process.c @@ -288,7 +288,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs) return IA64_FAULT; } } - physical_tlb_miss(v, vadr); + physical_tlb_miss(v, vadr, type); return IA64_FAULT; } @@ -306,7 +306,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs) return IA64_FAULT; } } - thash_vhpt_insert(v,data->page_flags, data->itir ,vadr); + thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type); }else if(type == DSIDE_TLB){ diff --git a/xen/arch/ia64/vmx/vtlb.c b/xen/arch/ia64/vmx/vtlb.c index 2e83358972..f85c9e63b4 100644 --- a/xen/arch/ia64/vmx/vtlb.c +++ b/xen/arch/ia64/vmx/vtlb.c @@ -178,11 +178,23 @@ static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa) return; } -void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va) +void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va, int type) { - u64 phy_pte; + u64 phy_pte, psr; + ia64_rr mrr; + + mrr.rrval = ia64_get_rr(va); phy_pte=translate_phy_pte(v, &pte, itir, va); - vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va); + + if (itir_ps(itir) >= mrr.ps) { + vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va); + } else { + phy_pte &= ~PAGE_FLAGS_RV_MASK; + psr = ia64_clear_ic(); + ia64_itc(type + 1, va, phy_pte, itir_ps(itir)); + ia64_set_psr(psr); + ia64_srlz_i(); + } } /* * vhpt lookup @@ -191,7 +203,7 @@ void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va) thash_data_t * vhpt_lookup(u64 va) { thash_data_t *hash, *head; - u64 tag, pte; + u64 tag, pte, itir; head = (thash_data_t *)ia64_thash(va); hash=head; tag = ia64_ttag(va); @@ -207,6 +219,9 @@ thash_data_t * vhpt_lookup(u64 va) tag = hash->etag; hash->etag = head->etag; head->etag = tag; + itir = hash->itir; + hash->itir = head->itir; + head->itir = itir; head->len = hash->len; hash->len=0; return head; @@ -223,7 +238,8 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte) if (data == NULL) { data = vtlb_lookup(current, iha, DSIDE_TLB); if (data != NULL) - thash_vhpt_insert(current, data->page_flags, data->itir ,iha); + thash_vhpt_insert(current, data->page_flags, data->itir, + iha, DSIDE_TLB); } asm volatile ("rsm psr.ic|psr.i;;" @@ -607,7 +623,8 @@ void thash_init(thash_cb_t *hcb, u64 sz) head=hcb->hash; num = (hcb->hash_sz/sizeof(thash_data_t)); do{ - head->itir = PAGE_SHIFT<<2; + head->page_flags = 0; + head->itir = 0; head->etag = 1UL<<63; head->next = 0; head++; @@ -617,11 +634,12 @@ void thash_init(thash_cb_t *hcb, u64 sz) hcb->cch_freelist = p = hcb->cch_buf; num = (hcb->cch_sz/sizeof(thash_data_t))-1; do{ - p->itir = PAGE_SHIFT<<2; + p->page_flags = 0; + p->itir = 0; p->next =p+1; p++; num--; }while(num); - p->itir = PAGE_SHIFT<<2; + p->itir = 0; p->next = NULL; } diff --git a/xen/include/asm-ia64/mm.h b/xen/include/asm-ia64/mm.h index ac665bcd0b..26266942b5 100644 --- a/xen/include/asm-ia64/mm.h +++ b/xen/include/asm-ia64/mm.h @@ -497,6 +497,10 @@ extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps, #define __gpa_to_mpa(_d, gpa) \ ((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<> PAGE_SHIFT) << PAGE_SHIFT) | \ + ((madr) & ~PAGE_MASK)) + /* Arch-specific portion of memory_op hypercall. */ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg); diff --git a/xen/include/asm-ia64/vmmu.h b/xen/include/asm-ia64/vmmu.h index ccd1ed5099..4b86a33711 100644 --- a/xen/include/asm-ia64/vmmu.h +++ b/xen/include/asm-ia64/vmmu.h @@ -305,7 +305,8 @@ extern void emulate_io_inst(struct vcpu *vcpu, u64 padr, u64 ma); extern int vhpt_enabled(struct vcpu *vcpu, uint64_t vadr, vhpt_ref_t ref); extern void vtlb_insert(struct vcpu *vcpu, u64 pte, u64 itir, u64 va); extern u64 translate_phy_pte(struct vcpu *v, u64 *pte, u64 itir, u64 va); -extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa); +extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa, + int type); extern u64 guest_vhpt_lookup(u64 iha, u64 *pte); static inline void vmx_vcpu_set_tr (thash_data_t *trp, u64 pte, u64 itir, u64 va, u64 rid) diff --git a/xen/include/asm-ia64/vmx_phy_mode.h b/xen/include/asm-ia64/vmx_phy_mode.h index bf9afde404..fcc6e5b388 100644 --- a/xen/include/asm-ia64/vmx_phy_mode.h +++ b/xen/include/asm-ia64/vmx_phy_mode.h @@ -96,7 +96,7 @@ extern void prepare_if_physical_mode(VCPU *vcpu); extern void recover_if_physical_mode(VCPU *vcpu); extern void vmx_init_all_rr(VCPU *vcpu); extern void vmx_load_all_rr(VCPU *vcpu); -extern void physical_tlb_miss(VCPU *vcpu, u64 vadr); +extern void physical_tlb_miss(VCPU *vcpu, u64 vadr, int type); /* * No sanity check here, since all psr changes have been * checked in switch_mm_mode(). -- 2.30.2